# Adam(lr = 1e-4) peut etre a remplacer par 'Adam'
NUMGPU = 0
#--------------------------------------------------------------------------------------------------------------------------------------------
from tensorflow.keras import backend as K
import tensorflow as tf
import os
GPUs = [NUMGPU] #ici tu peux mettre le numero du GPU à utiliser, donc 0, 1, ou 3 (le 2 est réservé à ZZ)
print("Setup environment to select appropriate GPUs.")
if not 'CUDA_VISIBLE_DEVICES' in os.environ:
# Note that the args.gpuid will be ignored if the environment variable is set
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in GPUs])
print("Content of CUDA_VISIBLE_DEVICES: '%s'" % [os.environ["CUDA_VISIBLE_DEVICES"]])
# /// !!! \\\
print("Set up Tensorflow backend session.")
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
config.allow_soft_placement = False # Require GPUs
config.log_device_placement = False # Display devices used
config.gpu_options.allow_growth = True # Do not allocate all memory on startup
sess = tf.compat.v1.Session(config=config)
#from keras.backend.tensorflow_backend import set_session
tf.compat.v1.keras.backend.set_session(sess)
#K.set_session(sess)
# Compute environment initialization
print("Compute devices available:")
# We need to import this after selecting GPUs and capping memory usage
from tensorflow.python.client import device_lib
# NOTE: up to TF 1.4, this allocates all available RAM unless we initialize
# a session properly before
for device in device_lib.list_local_devices():
print(" - %s" % (device.name))
#--------------------------------------------------------------------------------------------------------------------------------------------
import nibabel as nib
import tensorflow as tf
print(tf.__version__)
#nbf64 = 32
nbf64 = 32 #remets a 32 apres si tu veux, moi j'ai pas de GPU Nvidia sur mon mac ;)
TAILLEFINALE = 512
GENERER_IMAGES = False
R=3
def fast_display(*img2dlst):
plt.figure(figsize=(16,8))
nbimg = len(img2dlst)
cols = min(9, nbimg)
rows = (nbimg // cols) + 1
for ii, img2d in enumerate(img2dlst):
plt.subplot(rows, cols, 1+ii)
plt.imshow(img2d)
plt.show()
import tensorflow.keras.backend as K
# Define custom loss
def custom_loss(y_true,y_pred):
# Create a loss function that adds the MSE loss to the mean of all squared activations of a specific layer
def loss(y_true,y_pred):
return 2 * K.sum(y_true + y_pred - 2 * y_true * y_pred) + K.binary_crossentropy(y_true, y_pred, from_logits=False)
# Return a function
return loss(y_true,y_pred)
import os
DOSSIER_DONNEES_TEMP = '/lrde/home2/rdi-2022/tdala/my_tensorflow/venv/verse2019/tmp/'
DOSSIER_DONNEES_RESEAUX = '/lrde/home2/rdi-2022/tdala/my_tensorflow/venv/verse2019/tmp/'
DossierDesVertebres = '/lrde/home2/rdi-2022/tdala/my_tensorflow/venv/verse2019/[DATA_VERSE_ANTHONY]/'
os.chdir(DossierDesVertebres)
#SAVEDIRECTORY = '/Users/thuad/Downloads/LRDE/ANTHONY/[TRAINING_DATA]/'
#if os.path.exists(SAVEDIRECTORY) == False:
# os.mkdir(SAVEDIRECTORY)
import nibabel as nib
from nibabel.affines import apply_affine
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import cv2
import sklearn.preprocessing
import os
from scipy import ndimage
import cc3d
import scipy.linalg as lin
from tensorflow.keras.utils import *
import numpy.linalg as lin
nb_total_de_patients_disponibles = 0
for dirname, _, filenames in os.walk(DossierDesVertebres):
for filename in filenames:
if "nii.gz" in filename and "_seg.nii.gz" not in filename:
nb_total_de_patients_disponibles += 1
print("nb_total_de_patients_disponibles = ",nb_total_de_patients_disponibles)
nb_patients_training = 64 #mettre 64
nb_patients_validation = 8 # mettre 8
nb_patients_test = 8 #mettre 8
nbpatients_a_charger_au_plus = nb_patients_training + nb_patients_validation + nb_patients_test
assert(nbpatients_a_charger_au_plus <= nb_total_de_patients_disponibles)
# on definit une aimre minimale en dessous de laquelle on ne prend pas les images de vertebres
aire_minimum_de_vertebre = 100
# on calcule le nombre de slices que l'on va obtenir avec les patients pour
# l'allocation des fichiers memmap
nb_vertebres_training = 0
nb_vertebres_validation = 0
numpatient = 0
for dirname, _, filenames in os.walk(DossierDesVertebres):
for filename in filenames:
if "nii.gz" in filename and "_seg.nii.gz" not in filename:
if numpatient < nb_patients_training:
print("patient numero ",numpatient)
nomseg = DossierDesVertebres+filename[:-7]+"_seg.nii.gz"
seg = nib.load(nomseg).get_fdata()
[sxseg,syseg,szseg] = seg.shape
print("Dimensions [SEG] = ",[sxseg,syseg,szseg], " et intensite min = ",np.min(seg), " et intensite max = ",np.max(seg))
for i in range(int(np.min(seg)) + 1, int(np.max(seg)) + 1):
VT = np.where(seg == i,1,0)
if np.sum(VT) != 0:
nb_vertebres_training += 1
if numpatient >= (nb_total_de_patients_disponibles - nb_patients_test - nb_patients_validation) and numpatient < (nb_total_de_patients_disponibles - nb_patients_test):
print("patient numero ",numpatient)
nomseg = DossierDesVertebres+filename[:-7]+"_seg.nii.gz"
seg = nib.load(nomseg).get_fdata()
[sxseg,syseg,szseg] = seg.shape
print("Dimensions [SEG] = ",[sxseg,syseg,szseg], " et intensite min = ",np.min(seg), " et intensite max = ",np.max(seg))
for i in range(int(np.min(seg)) + 1, int(np.max(seg)) + 1):
VT = np.where(seg == i,1,0)
if np.sum(VT) != 0:
nb_vertebres_validation += 1
numpatient += 1
print("nb_vertebres_training = ",nb_vertebres_training)
print("nb_vertebres_validation = ",nb_vertebres_validation)
xtrain_classification = np.memmap(DOSSIER_DONNEES_TEMP+'xtrain_classification.hdf5', dtype='float32', mode='w+', shape=(nb_vertebres_training*48,64,64,64,1))
ytrain_classification = np.memmap(DOSSIER_DONNEES_TEMP+'ytrain_classification.hdf5', dtype='float32', mode='w+', shape=(nb_vertebres_training*48))
xvalidation_classification = np.memmap(DOSSIER_DONNEES_TEMP+'xvalidation_classification.hdf5', dtype='float32', mode='w+', shape=(nb_vertebres_validation,64,64,64,1))
yvalidation_classification = np.memmap(DOSSIER_DONNEES_TEMP+'yvalidation_classification.hdf5', dtype='float32', mode='w+', shape=(nb_vertebres_validation))
print(xtrain_classification.shape)
def f_vertebre(X_prime, A, vec_propre):
res = vec_propre @ X_prime + A
coordinate = np.round(res).astype(int)
if coordinate[0] >= sxseg or coordinate[1] >= syseg or coordinate[2] >= szseg:
return 0;
if coordinate[0] < 0 or coordinate[1] < 0 or coordinate[2] < 0:
return 0;
if VT[coordinate[0], coordinate[1], coordinate[2]] == 1:
return 1;
return 0;
def crop_from_the_web(mask):
coords = np.argwhere(mask)
x_min, y_min, z_min = coords.min(axis=0)
x_max, y_max, z_max = coords.max(axis=0)
cropped = mask[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1]
return cropped
def aligne_avec_tenseur(image):
print("avant réorientation")
fast_display(image[:,:,image.shape[2]//2],image[:,image.shape[1]//2,:],image[image.shape[0]//2,:,:])
#on suppose donnée un array 3D avec une seule vertebre
dim = image.shape
vertebre = image
dim = vertebre.shape
card = np.sum(vertebre)
grid = np.indices(dim)
gridx = grid[0]
gridy = grid[1]
gridz = grid[2]
zonea1 = (vertebre > 0)
# print("nombre de 1 = ",np.sum(zonea1))
x_masse = np.mean(gridx[zonea1].reshape((-1,)))
y_masse = np.mean(gridy[zonea1].reshape((-1,)))
z_masse = np.mean(gridz[zonea1].reshape((-1,)))
a11 = np.mean((gridx[zonea1] - x_masse)**2)
a22 = np.mean((gridy[zonea1] - y_masse)**2)
a33 = np.mean((gridz[zonea1] - z_masse)**2)
a12 = np.mean((gridx[zonea1] - x_masse)*(gridy[zonea1] - y_masse))
a13 = np.mean((gridx[zonea1] - x_masse)*(gridz[zonea1] - z_masse))
a23 = np.mean((gridy[zonea1] - y_masse)*(gridz[zonea1] - z_masse))
tenseur = [[a11,a12,a13],[a12,a22,a23],[a13,a23,a33]]
# print("tenseur = ",tenseur)
output_LA = lin.eig(tenseur)
# print("output_LA = ",output_LA)
eigenValues,eigenVectors = output_LA
idx = eigenValues.argsort()[::-1]
eigenValues = eigenValues[idx]
eigenVectors = eigenVectors[:,idx]
sigma1 = np.sqrt(eigenValues[0])
sigma2 = np.sqrt(eigenValues[1])
sigma3 = np.sqrt(eigenValues[2])
SIGMAMAX = np.max([sigma1,sigma2,sigma3])
#https://www.math.ubc.ca/~pwalls/math-python/linear-algebra/eigenvalues-eigenvectors/
vec1 = eigenVectors[:,0].reshape(3,1)
vec2 = eigenVectors[:,1].reshape(3,1)
vec3 = eigenVectors[:,2].reshape(3,1)
vertebre_reorientee = np.zeros((taille_input_classification,taille_input_classification,taille_input_classification))
vals_alpha1 = np.linspace(-COEFF*SIGMAMAX,COEFF*SIGMAMAX,taille_input_classification)
vals_alpha2 = np.linspace(-COEFF*SIGMAMAX,COEFF*SIGMAMAX,taille_input_classification)
vals_alpha3 = np.linspace(-COEFF*SIGMAMAX,COEFF*SIGMAMAX,taille_input_classification)
for cpt1 in range(taille_input_classification):
alpha1 = vals_alpha1[cpt1]
for cpt2 in range(taille_input_classification):
alpha2 = vals_alpha2[cpt2]
for cpt3 in range(taille_input_classification):
alpha3 = vals_alpha3[cpt3]
xround = np.round(x_masse + alpha1*vec1[0] + alpha2*vec2[0] + alpha3*vec3[0]).astype(int)
yround = np.round(y_masse + alpha1*vec1[1] + alpha2*vec2[1] + alpha3*vec3[1]).astype(int)
zround = np.round(z_masse + alpha1*vec1[2] + alpha2*vec2[2] + alpha3*vec3[2]).astype(int)
x_bornes_ok = np.logical_and(xround >= 0,xround < dim[0])
y_bornes_ok = np.logical_and(yround >= 0,yround < dim[1])
z_bornes_ok = np.logical_and(zround >= 0,zround < dim[2])
bornes_ok = x_bornes_ok and y_bornes_ok and z_bornes_ok
if bornes_ok:
vertebre_reorientee[cpt1,cpt2,cpt3] = vertebre[xround,yround,zround]
print("après réorientation")
fast_display(vertebre_reorientee[:,:,taille_input_classification//2],vertebre_reorientee[:,taille_input_classification//2,:],vertebre_reorientee[taille_input_classification//2,:,:])
return vertebre_reorientee
# on calcule le nombre de slices que l'on va obtenir avec les patients pour
# l'allocation des fichiers memmap
num_vertebres_training = 0
num_vertebres_validation = 0
taille_input_classification = 64
nbmaxvertebres = 25
numpatient = 0
num_vertebre = 1
COEFF = 3
for dirname, _, filenames in os.walk(DossierDesVertebres):
for filename in filenames:
if "nii.gz" in filename and "_seg.nii.gz" not in filename:
if numpatient < nb_patients_training:
print(filename)
print("patient numero ",numpatient)
nomseg = DossierDesVertebres+filename[:-7]+"_seg.nii.gz"
GT = nib.load(nomseg).get_fdata()
[sxseg,syseg,szseg] = GT.shape
for i in range(int(np.min(GT)) + 1, int(np.max(GT)) + 1):
print(i)
VT = np.zeros((sxseg,syseg,szseg))
VT = np.where(GT == float(i),1,0)
VT = ndimage.binary_erosion(VT,iterations = R)
if np.sum(VT) != 0:
cropped = crop_from_the_web(VT)
new = aligne_avec_tenseur(cropped)
num_vertebre = i
xtrain_classification[num_vertebres_training,:,:,:,0] = new
ytrain_classification[num_vertebres_training] = num_vertebre
num_vertebres_training += 1
new_transposee = np.transpose(new, (0,1,2))
#pas de flip (000)
#xtrain_classification[num_vertebres_training,:,:,:,0] = new_transposee[:,:,:]
#ytrain_classification[num_vertebres_training]=num_vertebre
#num_vertebres_training += 1
#flip 001
codeflip = (0)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 010
codeflip = (1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 011
codeflip = (0,1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 100
codeflip = (2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 101
codeflip = (0,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 110
codeflip = (1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 111
codeflip = (0,1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
# transposition 2
new_transposee = np.transpose(new, (0,2,1))
#pas de flip (000)
xtrain_classification[num_vertebres_training,:,:,:,0] = new_transposee[:,:,:]
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 001
codeflip = (0)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 010
codeflip = (1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 011
codeflip = (0,1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 100
codeflip = (2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 101
codeflip = (0,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 110
codeflip = (1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 111
codeflip = (0,1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
# transposition 3
new_transposee = np.transpose(new, (1,0,2))
#pas de flip (000)
xtrain_classification[num_vertebres_training,:,:,:,0] = new_transposee[:,:,:]
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 001
codeflip = (0)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 010
codeflip = (1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 011
codeflip = (0,1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 100
codeflip = (2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 101
codeflip = (0,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 110
codeflip = (1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 111
codeflip = (0,1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
# transposition 4
new_transposee = np.transpose(new, (1,2,0))
#pas de flip (000)
xtrain_classification[num_vertebres_training,:,:,:,0] = new_transposee[:,:,:]
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 001
codeflip = (0)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 010
codeflip = (1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 011
codeflip = (0,1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 100
codeflip = (2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 101
codeflip = (0,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 110
codeflip = (1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 111
codeflip = (0,1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
# transposition 5
new_transposee = np.transpose(new, (2,0,1))
#pas de flip (000)
xtrain_classification[num_vertebres_training,:,:,:,0] = new_transposee[:,:,:]
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 001
codeflip = (0)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 010
codeflip = (1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 011
codeflip = (0,1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 100
codeflip = (2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 101
codeflip = (0,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 110
codeflip = (1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 111
codeflip = (0,1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
# transposition 6
new_transposee = np.transpose(new, (2,1,0))
#pas de flip (000)
xtrain_classification[num_vertebres_training,:,:,:,0] = new_transposee[:,:,:]
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 001
codeflip = (0)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 010
codeflip = (1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 011
codeflip = (0,1)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 100
codeflip = (2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 101
codeflip = (0,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 110
codeflip = (1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
#flip 111
codeflip = (0,1,2)
xtrain_classification[num_vertebres_training,:,:,:,0] = np.flip(new_transposee,codeflip)
ytrain_classification[num_vertebres_training]=num_vertebre
num_vertebres_training += 1
print("Numéro de vertèbre", num_vertebre)
print("Numéro vertèbre training", num_vertebres_training)
if numpatient >= (nb_total_de_patients_disponibles - nb_patients_test - nb_patients_validation) and numpatient < (nb_total_de_patients_disponibles - nb_patients_test):
print("patient numero ",numpatient)
nomseg = DossierDesVertebres+filename[:-7]+"_seg.nii.gz"
GT = nib.load(nomseg).get_fdata()
[sxseg,syseg,szseg] = GT.shape
for i in range(int(np.min(GT)) + 1, int(np.max(GT)) + 1):
count = 0
VT = np.zeros((sxseg,syseg,szseg))
VT = np.where(GT == i, i, 0)
VT = ndimage.binary_erosion(VT,iterations=R)
if np.sum(VT) != 0:
im = np.array(np.where(VT==True))
#fast_display(VT[sxseg//2,:,:], VT[:,syseg//2,:], VT[:,:,szseg//2])
bar=np.array(im.mean(1))
coord = im.T- bar
tenseur = coord.T@coord/coord.shape[0]
#print("Tenseur")
#print(tenseur/count)
#print("Valeurs propres")
val_propre, vec_propre = lin.eig(tenseur)
#print(val_propre)
#print("Vecteurs propres")
#print(vec_propre)
#Permutation des vecteurs propres
for k in range(2):
if val_propre[k]<val_propre[k+1]:
vec_propre[:,k:k+2] = np.fliplr(vec_propre[:,k:k+2])
#print("Vecteurs propres triés")
#print(vec_propre)
new = np.zeros((64,64,64))
sigma = np.sqrt(np.max(np.real(val_propre)))
intervalle_entier = np.arange(64)
#intervalle = np.arange(-31,32,1)
for lambda1_entier in intervalle_entier:
for lambda2_entier in intervalle_entier:
for lambda3_entier in intervalle_entier:
lambda1 = -3*sigma + 2*3*sigma*lambda1_entier/63
lambda2 = -3*sigma + 2*3*sigma*lambda2_entier/63
lambda3 = -3*sigma + 2*3*sigma*lambda3_entier/63
X_prime = [lambda1, lambda2, lambda3]
#X_prime_round = np.round(X_prime).astype(int)
#if X_prime_round[0] + a.astype(int) >= 64 or X_prime_round[1] + a.astype(int) >= 64 or X_prime_round[2] + a.astype(int) >= 64:
# continue
new[lambda1_entier, lambda2_entier, lambda3_entier] = f_vertebre(X_prime, bar, vec_propre)
num_vertebre = i
xvalidation_classification[num_vertebres_validation,:,:,:,0] = np.copy(new)
yvalidation_classification[num_vertebres_validation] = num_vertebre
num_vertebres_validation += 1
print("Numéro de vertèbre", num_vertebre)
numpatient += 1
print("nb_vertebres_training = ",nb_vertebres_training)
print("nb_vertebres_validation = ",nb_vertebres_validation)
np.memmap.flush(xtrain_classification)
del xtrain_classification
np.memmap.flush(ytrain_classification)
del ytrain_classification
np.memmap.flush(xvalidation_classification)
del xvalidation_classification
np.memmap.flush(yvalidation_classification)
del yvalidation_classification
xtrain_classification = np.memmap(DOSSIER_DONNEES_TEMP+'xtrain_classification.hdf5', dtype='float32', mode='r', shape=(nb_vertebres_training*48,64,64,64,1))
ytrain_classification = np.memmap(DOSSIER_DONNEES_TEMP+'ytrain_classification.hdf5', dtype='float32', mode='r', shape=(nb_vertebres_training*48))
xvalidation_classification = np.memmap(DOSSIER_DONNEES_TEMP+'xvalidation_classification.hdf5', dtype='float32', mode='r', shape=(nb_vertebres_validation,64,64,64,1))
yvalidation_classification = np.memmap(DOSSIER_DONNEES_TEMP+'yvalidation_classification.hdf5', dtype='float32', mode='r', shape=(nb_vertebres_validation))
fast_display(xtrain_classification[2,32,:,:,0])
print(ytrain_classification[48])
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
#import keras
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
nbmaxvertebres = 25
#parametres du VGG réadapté en 3D
nbf64VGG = 8#16
taille_input_classification = 64
from tensorflow.keras import regularizers
REGUL_KERNEL_CLASSIF = 0
REGUL_ACTIVITY_CLASSIF = 0
PATIENCE_CLASSIFICATION = 20
nbf128VGG = 2*nbf64VGG
nbf256VGG = 2*nbf128VGG
nbf512VGG = 2*nbf256VGG
nbDENSE4096VGG = 256
def VGG16(input_size = (64,64,64,1)):
inputs = tf.keras.layers.Input(input_size)
# Block 1
x = Conv3D(nbf64VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block1_conv1',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(inputs)
x = Conv3D(nbf64VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block1_conv2',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), name='block1_pool')(x)
# Block 2
x = Conv3D(nbf128VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block2_conv1',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = Conv3D(nbf128VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block2_conv2',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), name='block2_pool')(x)
# Block 3
x = Conv3D(nbf256VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block3_conv1',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = Conv3D(nbf256VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block3_conv2',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = Conv3D(nbf256VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block3_conv3',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), name='block3_pool')(x)
# Block 4
x = Conv3D(nbf512VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block4_conv1',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = Conv3D(nbf512VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block4_conv2',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = Conv3D(nbf512VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block4_conv3',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), name='block4_pool')(x)
# Block 5
x = Conv3D(nbf512VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block5_conv1',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = Conv3D(nbf512VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block5_conv2',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = Conv3D(nbf512VGG, (3, 3, 3),
activation='relu',
padding='same',
name='block5_conv3',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), name='block5_pool')(x)
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(nbDENSE4096VGG,
activation='relu',
name='fc1',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = Dense(nbDENSE4096VGG,
activation='relu',
name='fc2',
kernel_regularizer=regularizers.l2(REGUL_KERNEL_CLASSIF),
activity_regularizer=regularizers.l1(REGUL_ACTIVITY_CLASSIF)
)(x)
x = Dense(1, activation='linear', name='predictions')(x)
# Create model.
model = Model(inputs, x, name='vgg16')
return model
#ici tu crées ton réseau (initialisation)
model_25 = VGG16(input_size = (taille_input_classification,taille_input_classification,taille_input_classification,1))
#compilation avec comme loss function la categorical cross entropy
model_25.compile(optimizer = Adam(lr = 1e-5), loss = 'mse', metrics = ['accuracy'])
print(ytrain_classification[:100] - model_25.predict(xtrain_classification[:100,:,:,:,:]))
model_25.summary()
model_checkpoint_classification = tf.keras.callbacks.ModelCheckpoint(DOSSIER_DONNEES_RESEAUX+'backup_network3D_verse_classification_vgg16.hdf5', monitor='loss',verbose=1, save_best_only=True)
earlystopping_classification = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=0,
patience=PATIENCE_CLASSIFICATION,
verbose=1,
mode='auto',
baseline=None,
restore_best_weights=True
)
print(xtrain_classification.shape, ytrain_classification.shape)
#batchsize de 1 (modifiable selon GPU, laisser a 1 si on ne sait pas)
BS = 1
NBEPOQUESMAX = 1000
#entrainement avec xtrain_classification, etc., a calculer avant
model_25.fit(
x=xtrain_classification[:-200,:,:,:,:],
y=ytrain_classification[:-200],
batch_size=BS,
epochs=NBEPOQUESMAX,
verbose=1,
callbacks=[model_checkpoint_classification,earlystopping_classification],
initial_epoch=0,
#validation_data = (xvalidation_classification, yvalidation_classification)
)
## sauvegarde pour plus tard
model_25.save(DOSSIER_DONNEES_RESEAUX+"verse3D_CLASSIFICATION_nbf64VGG="+str(nbf64VGG)+"_taille_input_classification="+str(taille_input_classification)+".hdf5","w")
error = np.atleast_2d(ytrain_classification[:400]).T - model_25.predict(xtrain_classification[:400,:,:,:,:])
print(error.shape)
x=np.arange(200)
plt.plot(np.abs(error))
# Pour la reconnaissance de la vertebre : tu charges les composantes connexes une par une si assez grandes
# et tu utilises le code ci-dessous pour en deduire le numero de la vertebre avec ton vgg entrainé
#ton code poucharger ici: ....
#dedans, tu mets ca pour reconnaitre
numero_label_estime = np.argmax(model_25(seg_vertebre.reshape((1,taille_input_classification,taille_input_classification,taille_input_classification,1))),axis=-1)
Classification3D[vertebre_possible == 1] = numero_label_estime
#puis tu affiches pour verifier que c'est coherent
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(DOSSIER_DONNEES_RESEAUX+'backup_network_verse.hdf5', monitor='val_loss',verbose=1, save_best_only=True)
earlystopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=10,
verbose=1,
mode='auto',
baseline=None,
restore_best_weights=True
)
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
#import keras
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
REGULARIZER = tf.keras.regularizers.l2(1e-5)
nbf128 = 2 * nbf64
nbf256 = 2 * nbf128
nbf512 = 2 * nbf256
nbf1024 = 2 * nbf512
def unet(pretrained_weights = None,input_size = (256,256,1)):
inputs = tf.keras.layers.Input(input_size)
conv1 = Conv2D(nbf64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer = REGULARIZER)(inputs)
conv1 = Conv2D(nbf64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(nbf128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(pool1)
conv2 = Conv2D(nbf128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(nbf256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(pool2)
conv3 = Conv2D(nbf256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(nbf512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(pool3)
conv4 = Conv2D(nbf512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(nbf1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(pool4)
conv5 = Conv2D(nbf1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(nbf512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(nbf512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(merge6)
conv6 = Conv2D(nbf512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv6)
up7 = Conv2D(nbf256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(nbf256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(merge7)
conv7 = Conv2D(nbf256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv7)
up8 = Conv2D(nbf128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(nbf128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(merge8)
conv8 = Conv2D(nbf128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv8)
up9 = Conv2D(nbf64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(nbf64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(merge9)
conv9 = Conv2D(nbf64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal',kernel_regularizer = REGULARIZER)(conv9)
conv10 = Conv2D(1, 1, activation = 'linear')(conv9)
model = Model(inputs = inputs, outputs = conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'mse', metrics = ['accuracy'])
#model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
# un seul canal en entrée
model = unet(input_size = (TAILLEFINALE,TAILLEFINALE,1))
NBEPOQUESMAX = 100
model.fit(
x=xtrain_classification,
y=ytrain_classification,
batch_size=1,
epochs=NBEPOQUESMAX,
verbose=1,
callbacks=[model_checkpoint,earlystopping],
initial_epoch=0,
validation_data = (xvalidation_classification, yvalidation_classification)
)
model.save(DOSSIER_DONNEES_RESEAUX+"R ="+str(R)+"linear_model.json","w")
model=load_model(DOSSIER_DONNEES_RESEAUX+"R ="+str(R)+"linear_model.json")
def compute_dice(seg,mask):
epsilon = 1.0
card_intersection = np.sum(seg * mask)
card_seg = np.sum(seg)
card_mask = np.sum(mask)
numerateur = 2* card_intersection + epsilon
denominateur = card_seg + card_mask + epsilon
return (numerateur/denominateur)
# on calcule le nombre de slices que l'on va obtenir avec les patients pour
# l'allocation des fichiers memmap
import cv2
from scipy import ndimage
liste_dices_patients_tests = np.zeros((nb_patients_test,))
numpatient = 0
for dirname, _, filenames in os.walk(DossierDesVertebres):
for filename in filenames:
if "nii.gz" in filename and "_seg.nii.gz" not in filename:
if numpatient >= (nb_total_de_patients_disponibles - nb_patients_test) and numpatient < (nb_total_de_patients_disponibles):
print("patient numero ",numpatient)
nomseg = DossierDesVertebres+filename[:-7]+"_seg.nii.gz"
GT = nib.load(nomseg).get_fdata()
[sxseg,syseg,szseg] = GT.shape
seg = np.zeros((sxseg,syseg,szseg))
for i in range(int(np.min(GT)) + 1, int(np.max(GT)) + 1):
VT = np.zeros((sxseg,syseg,szseg))
VT = np.where(GT == i, 1, 0)
VT = ndimage.binary_erosion(VT,iterations = R)
seg += VT
print("VT érodée, VT")
fast_display(seg[:,:,int(szseg/2)], GT[:,:,int(szseg/2)])
print("Dimensions [SEG] = ",[sxseg,syseg,szseg], " et intensite min = ",np.min(seg), " et intensite max = ",np.max(seg))
nomimg = DossierDesVertebres+filename
img = nib.load(nomimg).get_fdata()
[sx,sy,sz] = img.shape
print("Dimensions [IMG] = ",[sx,sy,sz], " et intensite min = ",np.min(img), " et intensite max = ",np.max(img))
# if sx==sxseg and sy==syseg and sz==szseg:
Prediction3DProbabilites = np.zeros((TAILLEFINALE,TAILLEFINALE,sz))
VT3D = np.zeros((TAILLEFINALE,TAILLEFINALE,sz))
for z in range(0,sz):
img2D = np.copy(img[:,:,z])
seg2D = np.copy(seg[:,:,z])
seg2D[seg2D > 0] = 1
if sx >= sy:
SXFINAL = TAILLEFINALE
SYFINAL = int(float(TAILLEFINALE) * float(sy) / float(sx))
else:
SYFINAL = TAILLEFINALE
SXFINAL = int(float(TAILLEFINALE) * float(sx) / float(sy))
extraction_img = np.copy(img2D)
extraction_img = np.uint8(255.0*(extraction_img - np.min(extraction_img))/(np.max(extraction_img) - np.min(extraction_img)))
coupeseg = np.copy(seg2D)
coupeseg[coupeseg > 0] = 1
#ce qui est à 1 est passé a 255 pour voir l'image en thumbnail sous macos
extraction_seg = np.uint8(255.0*np.float32(coupeseg))
imcouleur = np.zeros((sx,sy,3),dtype=np.uint8)
imcouleur[:,:,0] = extraction_img[:,:]
imcouleur[:,:,1] = extraction_img[:,:]
imcouleur[:,:,2] = extraction_img[:,:]
imageresized = np.array(cv2.resize(np.float32(Image.fromarray(imcouleur,'RGB')), (SYFINAL,SXFINAL), interpolation = cv2.INTER_LINEAR))
imagefinale = np.zeros((TAILLEFINALE,TAILLEFINALE,3),dtype=np.uint8)
imagefinale[:SXFINAL,:SYFINAL,:] = imageresized[:,:,:]
segcouleur = np.zeros((sx,sy,3),dtype=np.uint8)
segcouleur[:,:,0] = extraction_seg[:,:]
segcouleur[:,:,1] = extraction_seg[:,:]
segcouleur[:,:,2] = extraction_seg[:,:]
segresized = np.array(cv2.resize(np.float32(Image.fromarray(segcouleur,'RGB')), (SYFINAL,SXFINAL), interpolation = cv2.INTER_LINEAR))
# le resize peut introduire des niveaux de gris
# intermediaires alors on seuille pour n'avoir que des 0 ou 255
segresized[segresized >=128] = 255
segresized[segresized < 128] = 0
segfinale = np.zeros((TAILLEFINALE,TAILLEFINALE,3),dtype=np.uint8)
segfinale[:SXFINAL,:SYFINAL,:] = segresized[:,:,:]
coupeIRM = imagefinale[:,:,0]
Prediction3DProbabilites[:,:,z] = model.predict(coupeIRM.reshape((1,TAILLEFINALE,TAILLEFINALE,1))).reshape((TAILLEFINALE,TAILLEFINALE))
VT3D[:,:,z] = np.where(segfinale[:,:,0] > 128,1,0)
if z in [int(sz/2)]:
print("affichage comparaison sachant le max de proba a :",np.max(Prediction3DProbabilites[:,:,z]))
toplot1 = np.copy(np.round(Prediction3DProbabilites[:,:,z]))
print(toplot1[0,0])
toplot1[0,0] = 0
toplot1[0,1] = 1
toplot2 = np.copy(VT3D[:,:,z])
toplot2[0,0] = 0
toplot2[0,1] = 1
print("entrée,prédiction,VT")
fast_display(imagefinale[:,:,0],toplot1,toplot2)
#Prediction3DProbabilitesBinaire = np.round(Prediction3DProbabilites)
#liste_dices_patients_tests[numpatient - (nb_total_de_patients_disponibles - nb_patients_test)] = compute_dice(Prediction3DProbabilitesBinaire,VT3D)
numpatient+=1
print("nb_slices_training = ",nb_slices_training)
print("nb_slices_validation = ",nb_slices_validation)
print(liste_dices_patients_tests)
print(np.mean(liste_dices_patients_tests))